From 1357edf0dfff84a7d3d59e922eaf9aceaa4f62d0 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 21 May 2008 11:07:23 +0100 Subject: [PATCH] Handle IOMMU pagetable allocations when set_p2m_entry is called with non-zero page order. Signed-off-by: Xin Xiaohui Signed-off-by: Keir Fraser --- xen/arch/x86/mm/p2m.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index dcf35fabcb..853ba415d4 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -250,7 +250,7 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, // XXX -- this might be able to be faster iff current->domain == d mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table); void *table =map_domain_page(mfn_x(table_mfn)); - unsigned long gfn_remainder = gfn; + unsigned long i, gfn_remainder = gfn; l1_pgentry_t *p2m_entry; l1_pgentry_t entry_content; l2_pgentry_t l2e_content; @@ -328,9 +328,11 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, if ( iommu_enabled && is_hvm_domain(d) ) { if ( p2mt == p2m_ram_rw ) - iommu_map_page(d, gfn, mfn_x(mfn)); + for ( i = 0; i < (1UL << page_order); i++ ) + iommu_map_page(d, gfn+i, mfn_x(mfn)+i ); else - iommu_unmap_page(d, gfn); + for ( int i = 0; i < (1UL << page_order); i++ ) + iommu_unmap_page(d, gfn+i); } /* Success */ -- 2.30.2